extern void hypervisor_callback(void);
extern void failsafe_callback(void);
extern void smp_trap_init(trap_info_t *);
- int i;
cpu = ++cpucount;
/*
/* FPU is set up to default initial state. */
memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
- /* Virtual IDT is empty at start-of-day. */
- for ( i = 0; i < 256; i++ )
- {
- ctxt.trap_ctxt[i].vector = i;
- ctxt.trap_ctxt[i].cs = FLAT_KERNEL_CS;
- }
smp_trap_init(ctxt.trap_ctxt);
/* No LDT. */
local_setup_timer_irq();
}
-static atomic_t vcpus_rebooting;
-
-static void restore_vcpu_ready(void)
+void vcpu_prepare(int vcpu)
{
+ extern void hypervisor_callback(void);
+ extern void failsafe_callback(void);
+ extern void smp_trap_init(trap_info_t *);
+ extern void cpu_restore(void);
+ vcpu_guest_context_t ctxt;
+ struct task_struct *idle = idle_task(vcpu);
- atomic_dec(&vcpus_rebooting);
-}
+ if (vcpu == 0)
+ return;
-void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
-{
- int r;
- int gdt_pages;
- r = HYPERVISOR_vcpu_pickle(vcpu, ctxt);
- if (r != 0)
- panic("pickling vcpu %d -> %d!\n", vcpu, r);
-
- /* Translate from machine to physical addresses where necessary,
- so that they can be translated to our new machine address space
- after resume. libxc is responsible for doing this to vcpu0,
- but we do it to the others. */
- gdt_pages = (ctxt->gdt_ents + 511) / 512;
- ctxt->ctrlreg[3] = machine_to_phys(ctxt->ctrlreg[3]);
- for (r = 0; r < gdt_pages; r++)
- ctxt->gdt_frames[r] = mfn_to_pfn(ctxt->gdt_frames[r]);
-}
+ memset(&ctxt, 0, sizeof(ctxt));
-int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
-{
- int r;
- int gdt_pages = (ctxt->gdt_ents + 511) / 512;
-
- /* This is kind of a hack, and implicitly relies on the fact that
- the vcpu stops in a place where all of the call clobbered
- registers are already dead. */
- ctxt->user_regs.esp -= 4;
- ((unsigned long *)ctxt->user_regs.esp)[0] = ctxt->user_regs.eip;
- ctxt->user_regs.eip = (unsigned long)restore_vcpu_ready;
-
- /* De-canonicalise. libxc handles this for vcpu 0, but we need
- to do it for the other vcpus. */
- ctxt->ctrlreg[3] = phys_to_machine(ctxt->ctrlreg[3]);
- for (r = 0; r < gdt_pages; r++)
- ctxt->gdt_frames[r] = pfn_to_mfn(ctxt->gdt_frames[r]);
-
- atomic_set(&vcpus_rebooting, 1);
- r = HYPERVISOR_boot_vcpu(vcpu, ctxt);
- if (r != 0) {
- printk(KERN_EMERG "Failed to reboot vcpu %d (%d)\n", vcpu, r);
- return -1;
- }
+ ctxt.user_regs.ds = __USER_DS;
+ ctxt.user_regs.es = __USER_DS;
+ ctxt.user_regs.fs = 0;
+ ctxt.user_regs.gs = 0;
+ ctxt.user_regs.ss = __KERNEL_DS;
+ ctxt.user_regs.cs = __KERNEL_CS;
+ ctxt.user_regs.eip = (unsigned long)cpu_restore;
+ ctxt.user_regs.esp = idle->thread.esp;
+ ctxt.user_regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_IOPL_RING1;
- /* Make sure we wait for the new vcpu to come up before trying to do
- anything with it or starting the next one. */
- while (atomic_read(&vcpus_rebooting))
- barrier();
+ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
- return 0;
+ smp_trap_init(ctxt.trap_ctxt);
+
+ ctxt.ldt_ents = 0;
+
+ ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
+ ctxt.gdt_ents = cpu_gdt_descr[vcpu].size / 8;
+
+ ctxt.kernel_ss = __KERNEL_DS;
+ ctxt.kernel_sp = idle->thread.esp0;
+
+ ctxt.event_callback_cs = __KERNEL_CS;
+ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
+ ctxt.failsafe_callback_cs = __KERNEL_CS;
+ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
+
+ ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
+
+ (void)HYPERVISOR_boot_vcpu(vcpu, &ctxt);
}
extern unsigned long *pfn_to_mfn_frame_list[];
#ifdef CONFIG_SMP
- static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
- cpumask_t prev_online_cpus, prev_present_cpus;
-
- void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
- int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
+ cpumask_t prev_online_cpus;
+ int vcpu_prepare(int vcpu);
#endif
extern void xencons_resume(void);
preempt_enable();
-#ifdef CONFIG_SMP
- cpus_clear(prev_present_cpus);
- for_each_present_cpu(i) {
- if (i == 0)
- continue;
- save_vcpu_context(i, &suspended_cpu_records[i]);
- cpu_set(i, prev_present_cpus);
- }
-#endif
-
gnttab_suspend();
#ifdef __i386__
time_resume();
-#ifdef CONFIG_SMP
- for_each_cpu_mask(i, prev_present_cpus)
- restore_vcpu_context(i, &suspended_cpu_records[i]);
-#endif
-
__sti();
xencons_resume();
xenbus_resume();
#ifdef CONFIG_SMP
+ for_each_present_cpu(i)
+ vcpu_prepare(i);
+
out_reenable_cpus:
for_each_cpu_mask(i, prev_online_cpus) {
j = cpu_up(i);